library(tidyverse) # data manipulation
library(ggpubr) # producing data exploratory plots
library(modelsummary) # descriptive data
library(glmmTMB) # running generalised mixed models
library(DHARMa) # model diagnostics
library(performance) # model diagnostics
library(ggeffects) # partial effect plots
library(car) # running Anova on model
library(emmeans) # post-hoc analysisdf_adults_cleaned <- df_adults |>
mutate(FISH_ID = factor(FISH_ID),
Sex = factor(Sex),
Population = factor(Population),
Tank = factor(Tank),
Chamber = factor(Chamber),
System =factor(System),
Temperature =factor(Temperature),
True_resting=factor(True_resting))
df_males <- df_adults_cleaned |>
filter(Sex == "M")
df_females <- df_adults_cleaned |>
filter(Sex == "F")
df_adults_cleaned2 <- df_males |>
full_join(select(df_females, c("Tank","Temperature","Mass","Resting","Max","AAS","FISH_ID","Sex")), by="Tank") |>
mutate(Temperature.x = coalesce(Temperature.x, Temperature.y),
FISH_ID.x = coalesce(FISH_ID.x, FISH_ID.y),
Sex.x = coalesce(Sex.x, Sex.y),
Resting.midpoint = (Resting.x+Resting.y)/2,
Max.midpoint = (Max.x+Max.y)/2,
AAS.midpoint = (AAS.x+AAS.y)/2) df_jresp$Population <- fct_collapse(df_jresp$Population,
`Vlassof cay`= c("Vlassof reef", "Vlassof", "Vlassof Cay", "Vlassof cay"),
`Arlington reef` = c("Arlington reef","Arlginton reef"))
#df_jresp$Female <- fct_collapse(df_jresp$Female,
#`CARL359`= c("CARL359", "CARL59"))
df_jresp2 <- df_jresp |>
unite("F0", c("Male","Female"), sep="_", remove=FALSE) |>
mutate(across(1:7, factor),
Temperature = factor(Temperature),
True_resting = factor(True_resting))
#df_jresp2_rest <- df_jresp2 |>
#filter(True_resting == "Y")temp2a <- temp1a |>
left_join(select(df_adults_cleaned2, c("FISH_ID.x",
"Sex.x",
"Resting.x",
"Max.x",
"AAS.x",
"Mass.x")),
by="FISH_ID.x")temp2b <- temp1b |>
left_join(select(df_adults_cleaned2, c("FISH_ID.y",
"Sex.y",
"Resting.y",
"Max.y",
"AAS.y",
"Mass.y")),
by="FISH_ID.y") df_merged <- temp2a |>
left_join(select(temp2b, c("Clutch","Replicate",
"FISH_ID.y",
"Resting.y",
"Max.y",
"AAS.y",
"Mass.y")),
by=c("Clutch","Replicate"))df <- df_merged |>
mutate(Resting_MALE =Resting.x,
Max_MALE =Max.x,
AAS_MALE =AAS.x,
Mass_MALE =Mass.x,
FISH_ID.y =FISH_ID.x,#makes more sense for males to be .y instead of .x
FISH_ID.x =FISH_ID.x,
Resting_FEMALE =Resting.y,
Max_FEMALE =Max.y,
AAS_FEMALE =AAS.y,
Mass_FEMALE =Mass.y) |>
mutate(Resting_MID =(Resting_MALE+Resting_FEMALE)/2,
Max_MID =(Max_MALE+Max_FEMALE)/2,
AAS_MID =(AAS_MALE+AAS_FEMALE)/2) |> # easier to do it again
filter(True_resting == "Y") # this should remove 10 individuals from the dfplot1 <- ggplot(df, aes(x=Resting_MALE, y=Resting, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-male relationship") +
xlab("") +
ylab("Resting (parental-male)") +
theme_classic() +
theme(legend.position = 'none')
plot2 <- ggplot(df, aes(x=Max_MALE, y=Max, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-male relationship") +
xlab("") +
ylab("Max (parental-male)") +
theme_classic() +
theme(legend.position = 'none')
plot3 <- ggplot(df, aes(x=AAS_MALE, y=AAS, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-male relationship") +
xlab("") +
ylab("AAS (parental-male)") +
theme_classic() +
theme(legend.position = 'none')
plot4 <- ggplot(df, aes(x=Resting_MALE, y=Resting, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-male relationship") +
xlab("Resting (offspring)") +
ylab("Resting (parental-male)") +
theme_classic() +
theme(legend.position = "bottom")
plot5 <- ggplot(df, aes(x=Max_MALE, y=Max, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-male relationship") +
xlab("Max (offspring)") +
ylab("Max (parental-male)") +
theme_classic() +
theme(legend.position = 'none')
plot6 <- ggplot(df, aes(x=AAS_MALE, y=AAS, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-male relationship") +
xlab("AAS (offspring)") +
ylab("AAS (parental-male)") +
theme_classic() +
theme(legend.position = 'none')
ggarrange(plot1, plot2, plot3,
plot4, plot5, plot6,
ncol = 3,
nrow = 3)plot1 <- ggplot(df, aes(x=Resting_FEMALE, y=Resting, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-female relationship") +
xlab("") +
ylab("Resting (parental-female)") +
theme_classic() +
theme(legend.position = 'none')
plot2 <- ggplot(df, aes(x=Max_FEMALE, y=Max, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-female relationship") +
xlab("") +
ylab("Max (parental-female)") +
theme_classic() +
theme(legend.position = 'none')
plot3 <- ggplot(df, aes(x=AAS_FEMALE, y=AAS, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-female relationship") +
xlab("") +
ylab("AAS (parental-female)") +
theme_classic() +
theme(legend.position = 'none')
plot4 <- ggplot(df, aes(x=Resting_FEMALE, y=Resting, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-female relationship") +
xlab("Resting (offspring)") +
ylab("Resting (parental-female)") +
theme_classic() +
theme(legend.position = "bottom")
plot5 <- ggplot(df, aes(x=Max_FEMALE, y=Max, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-female relationship") +
xlab("Max (offspring)") +
ylab("Max (parental-female)") +
theme_classic() +
theme(legend.position = 'none')
plot6 <- ggplot(df, aes(x=AAS_FEMALE, y=AAS, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-female relationship") +
xlab("AAS (offspring)") +
ylab("AAS (parental-female)") +
theme_classic() +
theme(legend.position = 'none')
ggarrange(plot1, plot2, plot3,
plot4, plot5, plot6,
ncol = 3,
nrow = 3)plot1 <- ggplot(df, aes(x=Resting_MID, y=Resting, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-midpoint relationship") +
xlab("") +
ylab("Resting (parental-midpoint)") +
theme_classic() +
theme(legend.position = 'none')
plot2 <- ggplot(df, aes(x=Max_MID, y=Max, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-midpoint relationship") +
xlab("") +
ylab("Max (parental-midpoint)") +
theme_classic() +
theme(legend.position = 'none')
plot3 <- ggplot(df, aes(x=AAS_MID, y=AAS, color=Temperature)) +
stat_smooth(method = "lm") +
geom_point(alpha=0.1) +
ggtitle("Offspring-midpoint relationship") +
xlab("") +
ylab("AAS (parental-midpoint)") +
theme_classic() +
theme(legend.position = 'none')
plot4 <- ggplot(df, aes(x=Resting_MID, y=Resting, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-midpoint relationship") +
xlab("Resting (offspring)") + ylab("Resting (parental-midpoint)") +
theme_classic() +
theme(legend.position = 'none')
plot5 <- ggplot(df, aes(x=Max_MID, y=Max, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-midpoint relationship") +
xlab("Max (offspring)") + ylab("Max (parental-midpoint)") +
theme_classic() +
theme(legend.position = 'none')
plot6 <- ggplot(df, aes(x=AAS_MID, y=AAS, color=Temperature)) +
stat_smooth(method = "lm") +
#geom_point(alpha=0.1) +
ggtitle("Offspring-midpoint relationship") +
xlab("AAS (offspring)") + ylab("AAS (parental-midpoint)") +
theme_classic() +
theme(legend.position = 'none')
ggarrange(plot1, plot2, plot3,
plot4, plot5, plot6,
ncol = 3,
nrow = 3,
common.legend = TRUE)| Population | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington reef | 58 | 42 | 52 |
| Pretty patches | 25 | 21 | 34 |
| Sudbury reef | 26 | 15 | 16 |
| Vlassof cay | 25 | 10 | 25 |
| F0 | 27 | 28.5 | 30 |
|---|---|---|---|
| CARL217_CARL226 | 0 | 8 | 0 |
| CARL218_CARL222 | 0 | 0 | 13 |
| CARL230_CARL235 | 12 | 0 | 0 |
| CARL233_CARL215 | 0 | 0 | 8 |
| CARL237_CARL219 | 10 | 0 | 0 |
| CARL241_CARL239 | 15 | 0 | 0 |
| CARL249_CARL360 | 0 | 0 | 8 |
| CARL335_CARL359 | 0 | 13 | 0 |
| CARL338_CARL345 | 0 | 8 | 0 |
| CARL344_CARL370 | 0 | 0 | 15 |
| CARL354_CARL355 | 21 | 0 | 0 |
| CARL360_CARL249 | 0 | 0 | 8 |
| CARL367_CARL363 | 0 | 7 | 0 |
| CARL369_CARL349 | 0 | 6 | 0 |
| CPRE189_CPRE202 | 0 | 0 | 15 |
| CPRE372_CPRE209 | 6 | 0 | 0 |
| CPRE372_CPRE370 | 8 | 0 | 0 |
| CPRE375_CPRE377 | 11 | 0 | 0 |
| CPRE391_CPRE390 | 0 | 0 | 6 |
| CPRE447_CPRE452 | 0 | 0 | 13 |
| CPRE453_CPRE459 | 0 | 7 | 0 |
| CPRE521_CPRE524 | 0 | 7 | 0 |
| CPRE550_CPRE533 | 0 | 7 | 0 |
| CSUD002_CSUD213 | 0 | 8 | 0 |
| CSUD009_CSUD212 | 13 | 0 | 0 |
| CSUD013_CSUD017 | 13 | 0 | 0 |
| CSUD016_CSUD078 | 0 | 7 | 0 |
| CSUD312_CSUD304 | 0 | 0 | 16 |
| CVLA049_CVLA098 | 0 | 10 | 0 |
| CVLA089_CVLA059 | 0 | 0 | 7 |
| CVLA102_CVLA466 | 6 | 0 | 0 |
| CVLA106_CVLA091 | 0 | 0 | 12 |
| CVLA468_CVLA477 | 12 | 0 | 0 |
| CVLA486_CVLA463 | 7 | 0 | 0 |
| CVLA498_CVLA493 | 0 | 0 | 6 |
| Temperature | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 132 | 0.21 | 0.21 | 0.08 | 0.64 | 0.08 | ▃▆▇▆▁▁ |
| 28.5 | 88 | 0.24 | 0.23 | 0.09 | 0.47 | 0.08 | ▂▄▆▆▇▄▁▁ ▁ |
| 30 | 127 | 0.23 | 0.23 | 0.06 | 0.40 | 0.07 | ▁▂▃▅▇▅▅▄▁▁ |
| Temperature | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 127 | 0.58 | 0.55 | 0.27 | 1.66 | 0.19 | ▄▇▆▃▂ |
| 28.5 | 84 | 0.66 | 0.64 | 0.24 | 1.08 | 0.18 | ▂▅▇▇▇▃▄▂▂ |
| 30 | 126 | 0.65 | 0.64 | 0.16 | 1.29 | 0.19 | ▂▃▇▅▆▃▁ |
| Temperature | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 125 | 0.37 | 0.34 | 0.14 | 1.02 | 0.14 | ▃▇▇▄▃▂ |
| 28.5 | 84 | 0.42 | 0.40 | 0.10 | 0.79 | 0.15 | ▁▃▅▇▅▃▅▃▁▁ |
| 30 | 126 | 0.42 | 0.41 | 0.09 | 0.99 | 0.15 | ▁▄▆▄▇▃▁ |
| Population | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington reef | 8 | 7 | 4 |
| Pretty patches | 4 | 6 | 4 |
| Sudbury reef | 4 | 3 | 2 |
| Vlassof cay | 6 | 2 | 5 |
datasummary(Factor(Population) ~ Factor(Temperature)*Factor(Sex),
data = df_adults_cleaned,
fmt = "%.0f")| 27 | 28.5 | 30 | ||||
|---|---|---|---|---|---|---|
| Population | F | M | F | M | F | M |
| Arlington reef | 4 | 4 | 2 | 5 | 2 | 2 |
| Pretty patches | 2 | 2 | 3 | 3 | 3 | 1 |
| Sudbury reef | 2 | 2 | 1 | 2 | 1 | 1 |
| Vlassof cay | 3 | 3 | 1 | 1 | 3 | 2 |
Pairs
datasummary(Factor(Population)*Factor(Temperature.x) ~ AAS.midpoint*(NUnique),
data = df_adults_cleaned2,
fmt = "%.0f")| Population | Temperature.x | NUnique |
|---|---|---|
| Arlington reef | 27 | 3 |
| 28.5 | 2 | |
| 30 | 2 | |
| Pretty patches | 27 | 2 |
| 28.5 | 3 | |
| 30 | 1 | |
| Sudbury reef | 27 | 2 |
| 28.5 | 2 | |
| 30 | 1 | |
| Vlassof cay | 27 | 3 |
| 28.5 | 1 | |
| 30 | 2 |
| Temperature | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 22 | 6.29 | 6.06 | 3.82 | 10.09 | 1.56 | ▂▂▃▇▂▂▁▁▁▁ |
| 28.5 | 18 | 6.49 | 6.96 | 4.35 | 8.49 | 1.45 | ▇▂▅▂▃▃▅▃ |
| 30 | 15 | 7.29 | 7.20 | 5.14 | 9.15 | 1.46 | ▅▂▇▂▂▂▇▇ |
| Temperature | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 22 | 16.58 | 16.91 | 9.70 | 22.06 | 3.36 | ▃▃▅▅▃▃▅▇▂ |
| 28.5 | 18 | 17.09 | 17.23 | 11.04 | 28.39 | 3.94 | ▅▂▅▇▇▃▂ |
| 30 | 15 | 16.48 | 16.83 | 11.78 | 21.24 | 2.82 | ▂▃▂▃▇▂▂▃▂ |
| Temperature | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 22 | 10.29 | 10.26 | 3.85 | 16.28 | 3.14 | ▃▁▄▇▃▆▃▃▁ |
| 28.5 | 18 | 10.59 | 9.66 | 6.11 | 20.44 | 3.66 | ▅▅▇▇▃▂▂ |
| 30 | 15 | 9.19 | 9.16 | 4.36 | 12.77 | 2.91 | ▃▂▅▂▂▂▃▇ |
model1 <- glmmTMB(Resting ~ (1|Clutch),
family="gaussian",
data = df)
model2 <- glmmTMB(Resting ~ (1|Clutch) + (1|Population),
family="gaussian",
data = df)
model3 <- glmmTMB(Resting ~ (1|Clutch) + (1|Chamber),
family="gaussian",
data = df)
model4 <- glmmTMB(Resting ~ (1|Clutch) + (1|Population) + (1|Chamber),
family="gaussian",
data = df)After figuring out which random factors will be incorporated into the model we will start to examine out fixed factors. Some fixed factors such as Resting_(FE)MALE and TEMPERATURE will be essential to answering questions we have around heritability. Another factor that will be included is Dry_mass - which it should be pointed out in this experiment refers to the mass of fish after they were blotted dry with paper towel rather than completely dried out. Larger fish consume more oxygen, therefore, we need to account for this known relationship within our model. Out model will look something like this:
If we had alternative hypotheses to test would would do so at this stage. But in this instance the experiment was designed to answer a specific question via limiting potential covariates.
model1.1 <- glmmTMB(Resting ~ Resting_MALE*Temperature+scale(Dry_mass) + (1|Clutch),
family="gaussian",
data=df)Great now lets check how out model performed via model validation techniques
To check out model performance we will be using two different packages that perform model diagnositics. The packages used here are just examples, there are other packages out there that can provide the same function.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.164 0.292 0.04 0.024 0.424 0.496 0.132 0.376 0.42 0.308 0.244 0.144 0.284 0.072 0.116 0.132 0.392 0.956 0.272 0.052 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.046101, p-value = 0.5959
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0029, p-value = 0.944
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 3, observations = 278, p-value = 0.4897
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002230992 0.031211303
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01079137
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.046101, p-value = 0.5959
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0029, p-value = 0.944
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 3, observations = 278, p-value = 0.4897
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002230992 0.031211303
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01079137
## Family: gaussian ( identity )
## Formula:
## Resting ~ Resting_MALE * Temperature + scale(Dry_mass) + (1 | Clutch)
## Data: df
##
## AIC BIC logLik deviance df.resid
## -856.7 -824.1 437.4 -874.7 269
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Clutch (Intercept) 0.0004392 0.02096
## Residual 0.0022212 0.04713
## Number of obs: 278, groups: Clutch, 42
##
## Dispersion estimate for gaussian family (sigma^2): 0.00222
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.189873 0.025074 7.573 3.66e-14 ***
## Resting_MALE 0.003234 0.003800 0.851 0.3947
## Temperature28.5 0.005710 0.045607 0.125 0.9004
## Temperature30 0.085904 0.050048 1.716 0.0861 .
## scale(Dry_mass) 0.048009 0.003337 14.389 < 2e-16 ***
## Resting_MALE:Temperature28.5 0.002259 0.007202 0.314 0.7538
## Resting_MALE:Temperature30 -0.008442 0.007386 -1.143 0.2530
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.140729734 0.23901680 0.189873265
## Resting_MALE -0.004213063 0.01068137 0.003234153
## Temperature28.5 -0.083677409 0.09509772 0.005710154
## Temperature30 -0.012188071 0.18399674 0.085904336
## scale(Dry_mass) 0.041469218 0.05454820 0.048008710
## Resting_MALE:Temperature28.5 -0.011857204 0.01637589 0.002259342
## Resting_MALE:Temperature30 -0.022919384 0.00603441 -0.008442487
## Std.Dev.(Intercept)|Clutch 0.014130331 0.03108372 0.020957653
model1.1 |> emmeans(pairwise ~ Temperature, type="response") |>
summary(by=NULL, adjust="sidak", infer=TRUE)## NOTE: Results may be misleading due to involvement in interactions
## $emmeans
## Temperature emmean SE df lower.CL upper.CL t.ratio p.value
## 27 0.213 0.00619 269 0.199 0.228 34.495 <.0001
## 28.5 0.234 0.00851 269 0.213 0.254 27.432 <.0001
## 30 0.246 0.00915 269 0.224 0.268 26.872 <.0001
##
## Confidence level used: 0.95
## Conf-level adjustment: sidak method for 3 estimates
## P value adjustment: sidak method for 3 tests
##
## $contrasts
## contrast estimate SE df lower.CL upper.CL t.ratio
## Temperature27 - Temperature28.5 -0.0201 0.0105 269 -0.0453 0.00520 -1.908
## Temperature27 - Temperature30 -0.0323 0.0111 269 -0.0588 -0.00571 -2.919
## Temperature28.5 - Temperature30 -0.0122 0.0126 269 -0.0425 0.01801 -0.971
## p.value
## 0.1626
## 0.0114
## 0.7024
##
## Confidence level used: 0.95
## Conf-level adjustment: sidak method for 3 estimates
## P value adjustment: sidak method for 3 tests
om.rest <- emmeans(model1.1, ~Resting_MALE*Temperature,
at =list(Resting_MALE =seq(from=4, to =10, by=.1)))
om.rest.df <- as.data.frame(om.rest)
om.rest.obs <- drop_na(df, Resting_MALE, Resting) |>
mutate(Pred =predict(model1.1, re.form =NA, type='response'),
Resid =residuals(model1.1, type ="response"),
Fit =Pred + Resid)
om.rest.obs.summarize <- om.rest.obs |>
group_by(Clutch, Temperature) |>
summarise(mean.rest =mean(Resting, na.rm=TRUE),
mean.rest_male =mean(Resting_MALE, na.rm=TRUE),
sd.rest =sd(Resting, na.rm =TRUE),
n.rest = n()) |>
mutate(se.rest = sd.rest / sqrt(n.rest),
lower.ci.rest =mean.rest - qt(1 - (0.05/2), n.rest -1) * se.rest,
upper.ci.rest =mean.rest + qt(1 - (0.05/2), n.rest - 1) * se.rest)|>
ungroup()## `summarise()` has grouped output by 'Clutch'. You can override using the
## `.groups` argument.
ggplot(data =om.rest.df, aes(y=emmean, x=Resting_MALE)) +
stat_smooth(aes(color=Temperature),
method = "lm") +
geom_pointrange(data = om.rest.obs.summarize, aes(y =mean.rest, x=mean.rest_male,
ymin =lower.ci.rest,
ymax =upper.ci.rest, color = Temperature),
alpha =0.2) +
facet_wrap(~Temperature) +
theme_classic() +
theme(legend.position ="bottom")## `geom_smooth()` using formula = 'y ~ x'
fmodel1.1 <- glmmTMB(Resting ~ Resting_FEMALE*Temperature+scale(Dry_mass) + (1|Clutch),
family="gaussian",
data=df)Great now lets check how out model performed via model validation techniques
To check out model performance we will be using two different packages that perform model diagnositics. The packages used here are just examples, there are other packages out there that can provide the same function.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.408 0.484 0.264 0.236 0.156 0.264 0.056 0.132 0.084 0.476 0.964 0.288 0.092 0.88 0.224 0.732 0.284 0.224 0.252 0.236 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.063704, p-value = 0.2232
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0001, p-value = 0.928
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 3, observations = 270, p-value = 0.4797
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002297266 0.032126194
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01111111
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.063704, p-value = 0.2232
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0001, p-value = 0.928
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 3, observations = 270, p-value = 0.4797
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002297266 0.032126194
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01111111
## Family: gaussian ( identity )
## Formula:
## Resting ~ Resting_FEMALE * Temperature + scale(Dry_mass) + (1 | Clutch)
## Data: df
##
## AIC BIC logLik deviance df.resid
## -829.3 -796.9 423.6 -847.3 261
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Clutch (Intercept) 0.0003942 0.01985
## Residual 0.0022631 0.04757
## Number of obs: 270, groups: Clutch, 41
##
## Dispersion estimate for gaussian family (sigma^2): 0.00226
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.199816 0.030932 6.460 1.05e-10 ***
## Resting_FEMALE 0.002294 0.005290 0.434 0.665
## Temperature28.5 -0.034122 0.065759 -0.519 0.604
## Temperature30 -0.009355 0.055819 -0.168 0.867
## scale(Dry_mass) 0.049633 0.003453 14.372 < 2e-16 ***
## Resting_FEMALE:Temperature28.5 0.006726 0.009713 0.693 0.489
## Resting_FEMALE:Temperature30 0.003702 0.007863 0.471 0.638
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.139191277 0.26044089 0.199816085
## Resting_FEMALE -0.008074907 0.01266293 0.002294012
## Temperature28.5 -0.163007674 0.09476402 -0.034121827
## Temperature30 -0.118757206 0.10004756 -0.009354823
## scale(Dry_mass) 0.042863948 0.05640139 0.049632667
## Resting_FEMALE:Temperature28.5 -0.012310451 0.02576279 0.006726171
## Resting_FEMALE:Temperature30 -0.011708626 0.01911294 0.003702157
## Std.Dev.(Intercept)|Clutch 0.012980602 0.03036514 0.019853406
om.rest <- emmeans(fmodel1.1, ~Resting_FEMALE*Temperature,
at =list(Resting_FEMALE =seq(from=4, to =10, by=.1)))
om.rest.df <- as.data.frame(om.rest)
om.rest.obs <- drop_na(df, Resting_FEMALE, Resting) |>
mutate(Pred =predict(fmodel1.1, re.form =NA, type='response'),
Resid =residuals(fmodel1.1, type ="response"),
Fit =Pred + Resid)
om.rest.obs.summarize <- om.rest.obs |>
group_by(Clutch, Temperature) |>
summarise(mean.rest =mean(Resting, na.rm=TRUE),
mean.rest_female =mean(Resting_FEMALE, na.rm=TRUE),
sd.rest =sd(Resting, na.rm =TRUE),
n.rest = n()) |>
mutate(se.rest = sd.rest / sqrt(n.rest),
lower.ci.rest =mean.rest - qt(1 - (0.05/2), n.rest -1) * se.rest,
upper.ci.rest =mean.rest + qt(1 - (0.05/2), n.rest - 1) * se.rest)|>
ungroup()## `summarise()` has grouped output by 'Clutch'. You can override using the
## `.groups` argument.
ggplot(data =om.rest.df, aes(y=emmean, x=Resting_FEMALE)) +
stat_smooth(aes(color=Temperature),
method = "lm") +
geom_pointrange(data = om.rest.obs.summarize, aes(y =mean.rest, x=mean.rest_female,
ymin =lower.ci.rest,
ymax =upper.ci.rest, color = Temperature),
alpha =0.2) +
facet_wrap(~Temperature) +
theme_classic() +
theme(legend.position ="bottom")## `geom_smooth()` using formula = 'y ~ x'
mid_model1.1 <- glmmTMB(Resting ~ Resting_MID*Temperature+scale(Dry_mass) + (1|Clutch),
family="gaussian",
data=df)Great now lets check how out model performed via model validation techniques
To check out model performance we will be using two different packages that perform model diagnositics. The packages used here are just examples, there are other packages out there that can provide the same function.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.34 0.424 0.292 0.196 0.14 0.26 0.076 0.112 0.14 0.376 0.952 0.28 0.076 0.804 0.192 0.588 0.228 0.2 0.284 0.216 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.053223, p-value = 0.4994
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0059, p-value = 0.936
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 4, observations = 242, p-value = 0.1294
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.004521472 0.041777518
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01652893
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.053223, p-value = 0.4994
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0059, p-value = 0.936
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 4, observations = 242, p-value = 0.1294
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.004521472 0.041777518
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01652893
## Family: gaussian ( identity )
## Formula: Resting ~ Resting_MID * Temperature + scale(Dry_mass) + (1 |
## Clutch)
## Data: df
##
## AIC BIC logLik deviance df.resid
## -734.6 -703.2 376.3 -752.6 233
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Clutch (Intercept) 0.0004201 0.02050
## Residual 0.0023198 0.04816
## Number of obs: 242, groups: Clutch, 37
##
## Dispersion estimate for gaussian family (sigma^2): 0.00232
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.1925660 0.0299525 6.429 1.28e-10 ***
## Resting_MID 0.0033761 0.0048251 0.700 0.484
## Temperature28.5 -0.0430306 0.0711832 -0.605 0.546
## Temperature30 0.0209171 0.0769909 0.272 0.786
## scale(Dry_mass) 0.0488705 0.0037304 13.100 < 2e-16 ***
## Resting_MID:Temperature28.5 0.0089635 0.0109691 0.817 0.414
## Resting_MID:Temperature30 0.0004924 0.0109707 0.045 0.964
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.133860206 0.25127185 0.1925660287
## Resting_MID -0.006080937 0.01283311 0.0033760851
## Temperature28.5 -0.182547042 0.09648582 -0.0430306132
## Temperature30 -0.129982258 0.17181656 0.0209171490
## scale(Dry_mass) 0.041558977 0.05618203 0.0488705046
## Resting_MID:Temperature28.5 -0.012535456 0.03046243 0.0089634891
## Resting_MID:Temperature30 -0.021009706 0.02199453 0.0004924099
## Std.Dev.(Intercept)|Clutch 0.013160323 0.03191940 0.0204955981
om.rest <- emmeans(mid_model1.1, ~Resting_MID*Temperature,
at =list(Resting_MID =seq(from=4, to =10, by=.1)))
om.rest.df <- as.data.frame(om.rest)
om.rest.obs <- drop_na(df, Resting_MID, Resting) |>
mutate(Pred =predict(mid_model1.1, re.form =NA, type='response'),
Resid =residuals(mid_model1.1, type ="response"),
Fit =Pred + Resid)
om.rest.obs.summarize <- om.rest.obs |>
group_by(Clutch, Temperature) |>
summarise(mean.rest =mean(Resting, na.rm=TRUE),
mean.rest_female =mean(Resting_MID, na.rm=TRUE),
sd.rest =sd(Resting, na.rm =TRUE),
n.rest = n()) |>
mutate(se.rest = sd.rest / sqrt(n.rest),
lower.ci.rest =mean.rest - qt(1 - (0.05/2), n.rest -1) * se.rest,
upper.ci.rest =mean.rest + qt(1 - (0.05/2), n.rest - 1) * se.rest)|>
ungroup()## `summarise()` has grouped output by 'Clutch'. You can override using the
## `.groups` argument.
ggplot(data =om.rest.df, aes(y=emmean, x=Resting_MID)) +
stat_smooth(aes(color=Temperature),
method = "lm") +
geom_pointrange(data = om.rest.obs.summarize, aes(y =mean.rest, x=mean.rest_female,
ymin =lower.ci.rest,
ymax =upper.ci.rest, color = Temperature),
alpha =0.2) +
facet_wrap(~Temperature) +
theme_classic() +
theme(legend.position ="bottom")## `geom_smooth()` using formula = 'y ~ x'